%%timeit gamma_correction(img, 1.5) 96.8 ms ± 2.06 ms per loop (mean ± std. dev. of 7 runs, 10 loops each) %%timeit gamma_correction2(img, 1.5) 1.1 ms ± 282 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
img1 = cv2.imread('data/JD1.jpg') gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) sift = cv2.xfeatures2d.SIFT_create() kp = sift.detect(gray, None) img_ = cv2.drawKeypoints(img1, kp, None) show_(img_, 6) img_ = cv2.drawKeypoints(img1, kp, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) show_(img_, 6) kp0 = kp[0] print(kp0.angle) print(kp0.pt) print(kp0.size)img2 = cv2.imread('data/jack2.jpg') img2 = cv2.resize(img, (600, 800))
img = cv2.imread('data/butterfly.jpg') surf = cv2.xfeatures2d.SURF_create(400) kp, des = surf.detectAndCompute(img, None) print(f'keypoint-num: {len(kp)}') # 數量太多 可以設定閥值來減少點數 # Check present Hessian threshold print(f'Hessian threshold: {surf.getHessianThreshold()}') # 提高Hessian 的阈值 surf.setHessianThreshold(25000) # Again compute keypoints and check its number. kp, des = surf.detectAndCompute(img, None) print(f'keypoint-num: {len(kp)}') # 47 img2 = cv2.drawKeypoints(img, kp, None, (0, 255, 255), 4) show_(img, 8) show_(img2, 8)# 最后我们再看看关键点描述符的大小 如果是 64 维的就改成 128 维。 # Find size of descriptor print(surf.descriptorSize()) # 64 # That means flag, "extended" is False. print(surf.getExtended()) # False # So we make it to True to get 128-dim descriptors. # surf.extended = True surf.setExtended(True) kp, des = surf.detectAndCompute(img, None) print(surf.descriptorSize()) # 128 print(des.shape) # (47, 128) # 接下来要做的就是匹配了 我们会在后讨论。
img = cv2.imread('data/corner.jpg', 0) # Initiate STAR detector star = cv2.xfeatures2d.StarDetector_create() # Initiate BRIEF extractor brief = cv2.xfeatures2d.BriefDescriptorExtractor_create() # find the keypoints with STAR kp = star.detect(img, None) # compute the descriptors with BRIEF kp, des = brief.compute(img, kp) print(brief.descriptorSize()) print(des.shape)
img1 = cv2.imread('data/JD1.jpg') img2 = cv2.imread('data/JD2.jpg') sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with ORB kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) # create BFMatcher object bf = cv2.BFMatcher() # Match descriptors. matches = bf.knnMatch(des1, des2, k=2) # Apply ratio test good = [] for m, n in matches: if m.distance < 0.75 * n.distance: good.append([m]) # cv2.drawMatchesKnn expects list of lists as matches. img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS) show_(img3, 10)
img1 = cv2.imread('data/JD1.jpg', 0) img2 = cv2.imread('data/JD2.jpg', 0) sift = cv2.xfeatures2d.SIFT_create() # find the keypoints and descriptors with ORB kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) # FLANN parameters FLANN_INDEX_KDTREE = 1 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) # or pass empty dictionary flann = cv2.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) # Need to draw only good matches, so create a mask matchesMask = [[0, 0] for i in range(len(matches))] # ratio test as per Lowe's paper for i, (m, n) in enumerate(matches): if m.distance < 0.7 * n.distance: matchesMask[i] = [1, 0] draw_params = dict(matchColor=(0, 255, 0), singlePointColor=(255, 0, 0), matchesMask=matchesMask, flags=cv2.DrawMatchesFlags_DEFAULT) img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params) show_(img3, 10)